endif
ifeq (${WORKAROUND_CVE_2017_5715},1)
-BL31_SOURCES += lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S \
- lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
+BL31_SOURCES += lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S \
+ lib/cpus/aarch64/wa_cve_2017_5715_mmu.S
endif
BL31_LINKERFILE := bl31/bl31.ld.S
endif
ifeq (${WORKAROUND_CVE_2017_5715},1)
-BL32_SOURCES += bl32/sp_min/workaround_cve_2017_5715_bpiall.S \
- bl32/sp_min/workaround_cve_2017_5715_icache_inv.S
+BL32_SOURCES += bl32/sp_min/wa_cve_2017_5715_bpiall.S \
+ bl32/sp_min/wa_cve_2017_5715_icache_inv.S
endif
BL32_LINKERFILE := bl32/sp_min/sp_min.ld.S
--- /dev/null
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl wa_cve_2017_5715_bpiall_vbar
+
+vector_base wa_cve_2017_5715_bpiall_vbar
+ /* We encode the exception entry in the bottom 3 bits of SP */
+ add sp, sp, #1 /* Reset: 0b111 */
+ add sp, sp, #1 /* Undef: 0b110 */
+ add sp, sp, #1 /* Syscall: 0b101 */
+ add sp, sp, #1 /* Prefetch abort: 0b100 */
+ add sp, sp, #1 /* Data abort: 0b011 */
+ add sp, sp, #1 /* Reserved: 0b010 */
+ add sp, sp, #1 /* IRQ: 0b001 */
+ nop /* FIQ: 0b000 */
+
+ /*
+ * Invalidate the branch predictor, `r0` is a dummy register
+ * and is unused.
+ */
+ stcopr r0, BPIALL
+ isb
+
+ /*
+ * As we cannot use any temporary registers and cannot
+ * clobber SP, we can decode the exception entry using
+ * an unrolled binary search.
+ *
+ * Note, if this code is re-used by other secure payloads,
+ * the below exception entry vectors must be changed to
+ * the vectors specific to that secure payload.
+ */
+
+ tst sp, #4
+ bne 1f
+
+ tst sp, #2
+ bne 3f
+
+ /* Expected encoding: 0x1 and 0x0 */
+ tst sp, #1
+ /* Restore original value of SP by clearing the bottom 3 bits */
+ bic sp, sp, #0x7
+ bne plat_panic_handler /* IRQ */
+ b sp_min_handle_fiq /* FIQ */
+
+1:
+ tst sp, #2
+ bne 2f
+
+ /* Expected encoding: 0x4 and 0x5 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne sp_min_handle_smc /* Syscall */
+ b plat_panic_handler /* Prefetch abort */
+
+2:
+ /* Expected encoding: 0x7 and 0x6 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne sp_min_entrypoint /* Reset */
+ b plat_panic_handler /* Undef */
+
+3:
+ /* Expected encoding: 0x2 and 0x3 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne plat_panic_handler /* Data abort */
+ b plat_panic_handler /* Reserved */
--- /dev/null
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl wa_cve_2017_5715_icache_inv_vbar
+
+vector_base wa_cve_2017_5715_icache_inv_vbar
+ /* We encode the exception entry in the bottom 3 bits of SP */
+ add sp, sp, #1 /* Reset: 0b111 */
+ add sp, sp, #1 /* Undef: 0b110 */
+ add sp, sp, #1 /* Syscall: 0b101 */
+ add sp, sp, #1 /* Prefetch abort: 0b100 */
+ add sp, sp, #1 /* Data abort: 0b011 */
+ add sp, sp, #1 /* Reserved: 0b010 */
+ add sp, sp, #1 /* IRQ: 0b001 */
+ nop /* FIQ: 0b000 */
+
+ /*
+ * Invalidate the instruction cache, which we assume also
+ * invalidates the branch predictor. This may depend on
+ * other CPU specific changes (e.g. an ACTLR setting).
+ */
+ stcopr r0, ICIALLU
+ isb
+
+ /*
+ * As we cannot use any temporary registers and cannot
+ * clobber SP, we can decode the exception entry using
+ * an unrolled binary search.
+ *
+ * Note, if this code is re-used by other secure payloads,
+ * the below exception entry vectors must be changed to
+ * the vectors specific to that secure payload.
+ */
+
+ tst sp, #4
+ bne 1f
+
+ tst sp, #2
+ bne 3f
+
+ /* Expected encoding: 0x1 and 0x0 */
+ tst sp, #1
+ /* Restore original value of SP by clearing the bottom 3 bits */
+ bic sp, sp, #0x7
+ bne plat_panic_handler /* IRQ */
+ b sp_min_handle_fiq /* FIQ */
+
+1:
+ /* Expected encoding: 0x4 and 0x5 */
+ tst sp, #2
+ bne 2f
+
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne sp_min_handle_smc /* Syscall */
+ b plat_panic_handler /* Prefetch abort */
+
+2:
+ /* Expected encoding: 0x7 and 0x6 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne sp_min_entrypoint /* Reset */
+ b plat_panic_handler /* Undef */
+
+3:
+ /* Expected encoding: 0x2 and 0x3 */
+ tst sp, #1
+ bic sp, sp, #0x7
+ bne plat_panic_handler /* Data abort */
+ b plat_panic_handler /* Reserved */
+++ /dev/null
-/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <asm_macros.S>
-
- .globl workaround_bpiall_runtime_exceptions
-
-vector_base workaround_bpiall_runtime_exceptions
- /* We encode the exception entry in the bottom 3 bits of SP */
- add sp, sp, #1 /* Reset: 0b111 */
- add sp, sp, #1 /* Undef: 0b110 */
- add sp, sp, #1 /* Syscall: 0b101 */
- add sp, sp, #1 /* Prefetch abort: 0b100 */
- add sp, sp, #1 /* Data abort: 0b011 */
- add sp, sp, #1 /* Reserved: 0b010 */
- add sp, sp, #1 /* IRQ: 0b001 */
- nop /* FIQ: 0b000 */
-
- /*
- * Invalidate the branch predictor, `r0` is a dummy register
- * and is unused.
- */
- stcopr r0, BPIALL
- isb
-
- /*
- * As we cannot use any temporary registers and cannot
- * clobber SP, we can decode the exception entry using
- * an unrolled binary search.
- *
- * Note, if this code is re-used by other secure payloads,
- * the below exception entry vectors must be changed to
- * the vectors specific to that secure payload.
- */
-
- tst sp, #4
- bne 1f
-
- tst sp, #2
- bne 3f
-
- /* Expected encoding: 0x1 and 0x0 */
- tst sp, #1
- /* Restore original value of SP by clearing the bottom 3 bits */
- bic sp, sp, #0x7
- bne plat_panic_handler /* IRQ */
- b sp_min_handle_fiq /* FIQ */
-
-1:
- tst sp, #2
- bne 2f
-
- /* Expected encoding: 0x4 and 0x5 */
- tst sp, #1
- bic sp, sp, #0x7
- bne sp_min_handle_smc /* Syscall */
- b plat_panic_handler /* Prefetch abort */
-
-2:
- /* Expected encoding: 0x7 and 0x6 */
- tst sp, #1
- bic sp, sp, #0x7
- bne sp_min_entrypoint /* Reset */
- b plat_panic_handler /* Undef */
-
-3:
- /* Expected encoding: 0x2 and 0x3 */
- tst sp, #1
- bic sp, sp, #0x7
- bne plat_panic_handler /* Data abort */
- b plat_panic_handler /* Reserved */
+++ /dev/null
-/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <asm_macros.S>
-
- .globl workaround_icache_inv_runtime_exceptions
-
-vector_base workaround_icache_inv_runtime_exceptions
- /* We encode the exception entry in the bottom 3 bits of SP */
- add sp, sp, #1 /* Reset: 0b111 */
- add sp, sp, #1 /* Undef: 0b110 */
- add sp, sp, #1 /* Syscall: 0b101 */
- add sp, sp, #1 /* Prefetch abort: 0b100 */
- add sp, sp, #1 /* Data abort: 0b011 */
- add sp, sp, #1 /* Reserved: 0b010 */
- add sp, sp, #1 /* IRQ: 0b001 */
- nop /* FIQ: 0b000 */
-
- /*
- * Invalidate the instruction cache, which we assume also
- * invalidates the branch predictor. This may depend on
- * other CPU specific changes (e.g. an ACTLR setting).
- */
- stcopr r0, ICIALLU
- isb
-
- /*
- * As we cannot use any temporary registers and cannot
- * clobber SP, we can decode the exception entry using
- * an unrolled binary search.
- *
- * Note, if this code is re-used by other secure payloads,
- * the below exception entry vectors must be changed to
- * the vectors specific to that secure payload.
- */
-
- tst sp, #4
- bne 1f
-
- tst sp, #2
- bne 3f
-
- /* Expected encoding: 0x1 and 0x0 */
- tst sp, #1
- /* Restore original value of SP by clearing the bottom 3 bits */
- bic sp, sp, #0x7
- bne plat_panic_handler /* IRQ */
- b sp_min_handle_fiq /* FIQ */
-
-1:
- /* Expected encoding: 0x4 and 0x5 */
- tst sp, #2
- bne 2f
-
- tst sp, #1
- bic sp, sp, #0x7
- bne sp_min_handle_smc /* Syscall */
- b plat_panic_handler /* Prefetch abort */
-
-2:
- /* Expected encoding: 0x7 and 0x6 */
- tst sp, #1
- bic sp, sp, #0x7
- bne sp_min_entrypoint /* Reset */
- b plat_panic_handler /* Undef */
-
-3:
- /* Expected encoding: 0x2 and 0x3 */
- tst sp, #1
- bic sp, sp, #0x7
- bne plat_panic_handler /* Data abort */
- b plat_panic_handler /* Reserved */
--- /dev/null
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __WA_CVE_2017_5715_H__
+#define __WA_CVE_2017_5715_H__
+
+int check_wa_cve_2017_5715(void);
+
+#endif /* __WA_CVE_2017_5715_H__ */
+++ /dev/null
-/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef __WORKAROUND_CVE_2017_5715_H__
-#define __WORKAROUND_CVE_2017_5715_H__
-
-int check_workaround_cve_2017_5715(void);
-
-#endif /* __WORKAROUND_CVE_2017_5715_H__ */
#endif
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
- adr x0, workaround_mmu_runtime_exceptions
+ adr x0, wa_cve_2017_5715_mmu_vbar
msr vbar_el3, x0
#endif
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
cpu_check_csv2 x0, 1f
- adr x0, workaround_mmu_runtime_exceptions
+ adr x0, wa_cve_2017_5715_mmu_vbar
msr vbar_el3, x0
1:
#endif
func cortex_a73_reset_func
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
cpu_check_csv2 x0, 1f
- adr x0, workaround_bpiall_vbar0_runtime_exceptions
+ adr x0, wa_cve_2017_5715_bpiall_vbar
msr vbar_el3, x0
1:
#endif
func cortex_a75_reset_func
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
cpu_check_csv2 x0, 1f
- adr x0, workaround_bpiall_vbar0_runtime_exceptions
+ adr x0, wa_cve_2017_5715_bpiall_vbar
msr vbar_el3, x0
1:
#endif
#endif
/*
- * int check_workaround_cve_2017_5715(void);
+ * int check_wa_cve_2017_5715(void);
*
* This function returns:
* - ERRATA_APPLIES when firmware mitigation is required.
* NOTE: Must be called only after cpu_ops have been initialized
* in per-CPU data.
*/
- .globl check_workaround_cve_2017_5715
-func check_workaround_cve_2017_5715
+ .globl check_wa_cve_2017_5715
+func check_wa_cve_2017_5715
mrs x0, tpidr_el3
#if ENABLE_ASSERTIONS
cmp x0, #0
1:
mov x0, #ERRATA_NOT_APPLIES
ret
-endfunc check_workaround_cve_2017_5715
+endfunc check_wa_cve_2017_5715
--- /dev/null
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arm_arch_svc.h>
+#include <asm_macros.S>
+#include <context.h>
+
+ .globl wa_cve_2017_5715_bpiall_vbar
+
+#define EMIT_BPIALL 0xee070fd5
+#define EMIT_SMC 0xe1600070
+#define ESR_EL3_A64_SMC0 0x5e000000
+
+ .macro apply_cve_2017_5715_wa _from_vector
+ /*
+ * Save register state to enable a call to AArch32 S-EL1 and return
+ * Identify the original calling vector in w2 (==_from_vector)
+ * Use w3-w6 for additional register state preservation while in S-EL1
+ */
+
+ /* Save GP regs */
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+ stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+ stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+ stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+ stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+
+ /* Identify the original exception vector */
+ mov w2, \_from_vector
+
+ /* Preserve 32-bit system registers in GP registers through the workaround */
+ mrs x3, esr_el3
+ mrs x4, spsr_el3
+ mrs x5, scr_el3
+ mrs x6, sctlr_el1
+
+ /*
+ * Preserve LR and ELR_EL3 registers in the GP regs context.
+ * Temporarily use the CTX_GPREG_SP_EL0 slot to preserve ELR_EL3
+ * through the workaround. This is OK because at this point the
+ * current state for this context's SP_EL0 is in the live system
+ * register, which is unmodified by the workaround.
+ */
+ mrs x7, elr_el3
+ stp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+ /*
+ * Load system registers for entry to S-EL1.
+ */
+
+ /* Mask all interrupts and set AArch32 Supervisor mode */
+ movz w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK)
+
+ /* Switch EL3 exception vectors while the workaround is executing. */
+ adr x9, wa_cve_2017_5715_bpiall_ret_vbar
+
+ /* Setup SCTLR_EL1 with MMU off and I$ on */
+ ldr x10, stub_sel1_sctlr
+
+ /* Land at the S-EL1 workaround stub */
+ adr x11, aarch32_stub
+
+ /*
+ * Setting SCR_EL3 to all zeroes means that the NS, RW
+ * and SMD bits are configured as expected.
+ */
+ msr scr_el3, xzr
+ msr spsr_el3, x8
+ msr vbar_el3, x9
+ msr sctlr_el1, x10
+ msr elr_el3, x11
+
+ eret
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * This vector table is used at runtime to enter the workaround at
+ * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions. If the workaround
+ * is not enabled, the existing runtime exception vector table is used.
+ * ---------------------------------------------------------------------
+ */
+vector_base wa_cve_2017_5715_bpiall_vbar
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_sync_exception_sp_el0
+ b sync_exception_sp_el0
+ nop /* to force 8 byte alignment for the following stub */
+
+ /*
+ * Since each vector table entry is 128 bytes, we can store the
+ * stub context in the unused space to minimize memory footprint.
+ */
+stub_sel1_sctlr:
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+
+aarch32_stub:
+ .word EMIT_BPIALL
+ .word EMIT_SMC
+
+ check_vector_size bpiall_sync_exception_sp_el0
+
+vector_entry bpiall_irq_sp_el0
+ b irq_sp_el0
+ check_vector_size bpiall_irq_sp_el0
+
+vector_entry bpiall_fiq_sp_el0
+ b fiq_sp_el0
+ check_vector_size bpiall_fiq_sp_el0
+
+vector_entry bpiall_serror_sp_el0
+ b serror_sp_el0
+ check_vector_size bpiall_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_sync_exception_sp_elx
+ b sync_exception_sp_elx
+ check_vector_size bpiall_sync_exception_sp_elx
+
+vector_entry bpiall_irq_sp_elx
+ b irq_sp_elx
+ check_vector_size bpiall_irq_sp_elx
+
+vector_entry bpiall_fiq_sp_elx
+ b fiq_sp_elx
+ check_vector_size bpiall_fiq_sp_elx
+
+vector_entry bpiall_serror_sp_elx
+ b serror_sp_elx
+ check_vector_size bpiall_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_sync_exception_aarch64
+ apply_cve_2017_5715_wa 1
+ check_vector_size bpiall_sync_exception_aarch64
+
+vector_entry bpiall_irq_aarch64
+ apply_cve_2017_5715_wa 2
+ check_vector_size bpiall_irq_aarch64
+
+vector_entry bpiall_fiq_aarch64
+ apply_cve_2017_5715_wa 4
+ check_vector_size bpiall_fiq_aarch64
+
+vector_entry bpiall_serror_aarch64
+ apply_cve_2017_5715_wa 8
+ check_vector_size bpiall_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_sync_exception_aarch32
+ apply_cve_2017_5715_wa 1
+ check_vector_size bpiall_sync_exception_aarch32
+
+vector_entry bpiall_irq_aarch32
+ apply_cve_2017_5715_wa 2
+ check_vector_size bpiall_irq_aarch32
+
+vector_entry bpiall_fiq_aarch32
+ apply_cve_2017_5715_wa 4
+ check_vector_size bpiall_fiq_aarch32
+
+vector_entry bpiall_serror_aarch32
+ apply_cve_2017_5715_wa 8
+ check_vector_size bpiall_serror_aarch32
+
+ /* ---------------------------------------------------------------------
+ * This vector table is used while the workaround is executing. It
+ * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError
+ * workaround stubs to enter EL3 from S-EL1. It restores the previous
+ * EL3 state before proceeding with the normal runtime exception vector.
+ * ---------------------------------------------------------------------
+ */
+vector_base wa_cve_2017_5715_bpiall_ret_vbar
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_ret_sync_exception_sp_el0
+ b report_unhandled_exception
+ check_vector_size bpiall_ret_sync_exception_sp_el0
+
+vector_entry bpiall_ret_irq_sp_el0
+ b report_unhandled_interrupt
+ check_vector_size bpiall_ret_irq_sp_el0
+
+vector_entry bpiall_ret_fiq_sp_el0
+ b report_unhandled_interrupt
+ check_vector_size bpiall_ret_fiq_sp_el0
+
+vector_entry bpiall_ret_serror_sp_el0
+ b report_unhandled_exception
+ check_vector_size bpiall_ret_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_ret_sync_exception_sp_elx
+ b report_unhandled_exception
+ check_vector_size bpiall_ret_sync_exception_sp_elx
+
+vector_entry bpiall_ret_irq_sp_elx
+ b report_unhandled_interrupt
+ check_vector_size bpiall_ret_irq_sp_elx
+
+vector_entry bpiall_ret_fiq_sp_elx
+ b report_unhandled_interrupt
+ check_vector_size bpiall_ret_fiq_sp_elx
+
+vector_entry bpiall_ret_serror_sp_elx
+ b report_unhandled_exception
+ check_vector_size bpiall_ret_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_ret_sync_exception_aarch64
+ b report_unhandled_exception
+ check_vector_size bpiall_ret_sync_exception_aarch64
+
+vector_entry bpiall_ret_irq_aarch64
+ b report_unhandled_interrupt
+ check_vector_size bpiall_ret_irq_aarch64
+
+vector_entry bpiall_ret_fiq_aarch64
+ b report_unhandled_interrupt
+ check_vector_size bpiall_ret_fiq_aarch64
+
+vector_entry bpiall_ret_serror_aarch64
+ b report_unhandled_exception
+ check_vector_size bpiall_ret_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry bpiall_ret_sync_exception_aarch32
+ /*
+ * w2 indicates which SEL1 stub was run and thus which original vector was used
+ * w3-w6 contain saved system register state (esr_el3 in w3)
+ * Restore LR and ELR_EL3 register state from the GP regs context
+ */
+ ldp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+
+ /* Apply the restored system register state */
+ msr esr_el3, x3
+ msr spsr_el3, x4
+ msr scr_el3, x5
+ msr sctlr_el1, x6
+ msr elr_el3, x7
+
+ /*
+ * Workaround is complete, so swap VBAR_EL3 to point
+ * to workaround entry table in preparation for subsequent
+ * Sync/IRQ/FIQ/SError exceptions.
+ */
+ adr x0, wa_cve_2017_5715_bpiall_vbar
+ msr vbar_el3, x0
+
+ /*
+ * Restore all GP regs except x2 and x3 (esr). The value in x2
+ * indicates the type of the original exception.
+ */
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+ ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+ ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+ ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+ ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+
+ /* Fast path Sync exceptions. Static predictor will fall through. */
+ tbz w2, #0, workaround_not_sync
+
+ /*
+ * Check if SMC is coming from A64 state on #0
+ * with W0 = SMCCC_ARCH_WORKAROUND_1
+ *
+ * This sequence evaluates as:
+ * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
+ * allowing use of a single branch operation
+ */
+ orr w2, wzr, #SMCCC_ARCH_WORKAROUND_1
+ cmp w0, w2
+ mov_imm w2, ESR_EL3_A64_SMC0
+ ccmp w3, w2, #0, eq
+ /* Static predictor will predict a fall through */
+ bne 1f
+ eret
+1:
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b sync_exception_aarch64
+ check_vector_size bpiall_ret_sync_exception_aarch32
+
+vector_entry bpiall_ret_irq_aarch32
+ b report_unhandled_interrupt
+
+ /*
+ * Post-workaround fan-out for non-sync exceptions
+ */
+workaround_not_sync:
+ tbnz w2, #3, bpiall_ret_serror
+ tbnz w2, #2, bpiall_ret_fiq
+ /* IRQ */
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b irq_aarch64
+
+bpiall_ret_fiq:
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b fiq_aarch64
+
+bpiall_ret_serror:
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b serror_aarch64
+ check_vector_size bpiall_ret_irq_aarch32
+
+vector_entry bpiall_ret_fiq_aarch32
+ b report_unhandled_interrupt
+ check_vector_size bpiall_ret_fiq_aarch32
+
+vector_entry bpiall_ret_serror_aarch32
+ b report_unhandled_exception
+ check_vector_size bpiall_ret_serror_aarch32
--- /dev/null
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arm_arch_svc.h>
+#include <asm_macros.S>
+#include <context.h>
+
+ .globl wa_cve_2017_5715_mmu_vbar
+
+#define ESR_EL3_A64_SMC0 0x5e000000
+
+vector_base wa_cve_2017_5715_mmu_vbar
+
+ .macro apply_cve_2017_5715_wa _is_sync_exception
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ mrs x1, sctlr_el3
+ /* Disable MMU */
+ bic x1, x1, #SCTLR_M_BIT
+ msr sctlr_el3, x1
+ isb
+ /* Enable MMU */
+ orr x1, x1, #SCTLR_M_BIT
+ msr sctlr_el3, x1
+ /*
+ * Defer ISB to avoid synchronizing twice in case we hit
+ * the workaround SMC call which will implicitly synchronize
+ * because of the ERET instruction.
+ */
+
+ /*
+ * Ensure SMC is coming from A64 state on #0
+ * with W0 = SMCCC_ARCH_WORKAROUND_1
+ *
+ * This sequence evaluates as:
+ * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
+ * allowing use of a single branch operation
+ */
+ .if \_is_sync_exception
+ orr w1, wzr, #SMCCC_ARCH_WORKAROUND_1
+ cmp w0, w1
+ mrs x0, esr_el3
+ mov_imm w1, ESR_EL3_A64_SMC0
+ ccmp w0, w1, #0, eq
+ /* Static predictor will predict a fall through */
+ bne 1f
+ eret
+1:
+ .endif
+
+ /*
+ * Synchronize now to enable the MMU. This is required
+ * to ensure the load pair below reads the data stored earlier.
+ */
+ isb
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_sp_el0
+ b sync_exception_sp_el0
+ check_vector_size mmu_sync_exception_sp_el0
+
+vector_entry mmu_irq_sp_el0
+ b irq_sp_el0
+ check_vector_size mmu_irq_sp_el0
+
+vector_entry mmu_fiq_sp_el0
+ b fiq_sp_el0
+ check_vector_size mmu_fiq_sp_el0
+
+vector_entry mmu_serror_sp_el0
+ b serror_sp_el0
+ check_vector_size mmu_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_sp_elx
+ b sync_exception_sp_elx
+ check_vector_size mmu_sync_exception_sp_elx
+
+vector_entry mmu_irq_sp_elx
+ b irq_sp_elx
+ check_vector_size mmu_irq_sp_elx
+
+vector_entry mmu_fiq_sp_elx
+ b fiq_sp_elx
+ check_vector_size mmu_fiq_sp_elx
+
+vector_entry mmu_serror_sp_elx
+ b serror_sp_elx
+ check_vector_size mmu_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=1
+ b sync_exception_aarch64
+ check_vector_size mmu_sync_exception_aarch64
+
+vector_entry mmu_irq_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=0
+ b irq_aarch64
+ check_vector_size mmu_irq_aarch64
+
+vector_entry mmu_fiq_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=0
+ b fiq_aarch64
+ check_vector_size mmu_fiq_aarch64
+
+vector_entry mmu_serror_aarch64
+ apply_cve_2017_5715_wa _is_sync_exception=0
+ b serror_aarch64
+ check_vector_size mmu_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry mmu_sync_exception_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=1
+ b sync_exception_aarch32
+ check_vector_size mmu_sync_exception_aarch32
+
+vector_entry mmu_irq_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=0
+ b irq_aarch32
+ check_vector_size mmu_irq_aarch32
+
+vector_entry mmu_fiq_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=0
+ b fiq_aarch32
+ check_vector_size mmu_fiq_aarch32
+
+vector_entry mmu_serror_aarch32
+ apply_cve_2017_5715_wa _is_sync_exception=0
+ b serror_aarch32
+ check_vector_size mmu_serror_aarch32
+++ /dev/null
-/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch.h>
-#include <arm_arch_svc.h>
-#include <asm_macros.S>
-#include <context.h>
-
- .globl workaround_bpiall_vbar0_runtime_exceptions
-
-#define EMIT_BPIALL 0xee070fd5
-#define EMIT_SMC 0xe1600070
-#define ESR_EL3_A64_SMC0 0x5e000000
-
- .macro enter_workaround _from_vector
- /*
- * Save register state to enable a call to AArch32 S-EL1 and return
- * Identify the original calling vector in w2 (==_from_vector)
- * Use w3-w6 for additional register state preservation while in S-EL1
- */
-
- /* Save GP regs */
- stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
- stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
- stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
- stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
- stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
- stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
- stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
- stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
- stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
- stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
- stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
- stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
- stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
- stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
- stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
-
- /* Identify the original exception vector */
- mov w2, \_from_vector
-
- /* Preserve 32-bit system registers in GP registers through the workaround */
- mrs x3, esr_el3
- mrs x4, spsr_el3
- mrs x5, scr_el3
- mrs x6, sctlr_el1
-
- /*
- * Preserve LR and ELR_EL3 registers in the GP regs context.
- * Temporarily use the CTX_GPREG_SP_EL0 slot to preserve ELR_EL3
- * through the workaround. This is OK because at this point the
- * current state for this context's SP_EL0 is in the live system
- * register, which is unmodified by the workaround.
- */
- mrs x7, elr_el3
- stp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
-
- /*
- * Load system registers for entry to S-EL1.
- */
-
- /* Mask all interrupts and set AArch32 Supervisor mode */
- movz w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK)
-
- /* Switch EL3 exception vectors while the workaround is executing. */
- adr x9, workaround_bpiall_vbar1_runtime_exceptions
-
- /* Setup SCTLR_EL1 with MMU off and I$ on */
- ldr x10, stub_sel1_sctlr
-
- /* Land at the S-EL1 workaround stub */
- adr x11, aarch32_stub
-
- /*
- * Setting SCR_EL3 to all zeroes means that the NS, RW
- * and SMD bits are configured as expected.
- */
- msr scr_el3, xzr
- msr spsr_el3, x8
- msr vbar_el3, x9
- msr sctlr_el1, x10
- msr elr_el3, x11
-
- eret
- .endm
-
- /* ---------------------------------------------------------------------
- * This vector table is used at runtime to enter the workaround at
- * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions. If the workaround
- * is not enabled, the existing runtime exception vector table is used.
- * ---------------------------------------------------------------------
- */
-vector_base workaround_bpiall_vbar0_runtime_exceptions
-
- /* ---------------------------------------------------------------------
- * Current EL with SP_EL0 : 0x0 - 0x200
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0
- b sync_exception_sp_el0
- nop /* to force 8 byte alignment for the following stub */
-
- /*
- * Since each vector table entry is 128 bytes, we can store the
- * stub context in the unused space to minimize memory footprint.
- */
-stub_sel1_sctlr:
- .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
-
-aarch32_stub:
- .word EMIT_BPIALL
- .word EMIT_SMC
-
- check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0
-
-vector_entry workaround_bpiall_vbar0_irq_sp_el0
- b irq_sp_el0
- check_vector_size workaround_bpiall_vbar0_irq_sp_el0
-
-vector_entry workaround_bpiall_vbar0_fiq_sp_el0
- b fiq_sp_el0
- check_vector_size workaround_bpiall_vbar0_fiq_sp_el0
-
-vector_entry workaround_bpiall_vbar0_serror_sp_el0
- b serror_sp_el0
- check_vector_size workaround_bpiall_vbar0_serror_sp_el0
-
- /* ---------------------------------------------------------------------
- * Current EL with SP_ELx: 0x200 - 0x400
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_bpiall_vbar0_sync_exception_sp_elx
- b sync_exception_sp_elx
- check_vector_size workaround_bpiall_vbar0_sync_exception_sp_elx
-
-vector_entry workaround_bpiall_vbar0_irq_sp_elx
- b irq_sp_elx
- check_vector_size workaround_bpiall_vbar0_irq_sp_elx
-
-vector_entry workaround_bpiall_vbar0_fiq_sp_elx
- b fiq_sp_elx
- check_vector_size workaround_bpiall_vbar0_fiq_sp_elx
-
-vector_entry workaround_bpiall_vbar0_serror_sp_elx
- b serror_sp_elx
- check_vector_size workaround_bpiall_vbar0_serror_sp_elx
-
- /* ---------------------------------------------------------------------
- * Lower EL using AArch64 : 0x400 - 0x600
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_bpiall_vbar0_sync_exception_aarch64
- enter_workaround 1
- check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64
-
-vector_entry workaround_bpiall_vbar0_irq_aarch64
- enter_workaround 2
- check_vector_size workaround_bpiall_vbar0_irq_aarch64
-
-vector_entry workaround_bpiall_vbar0_fiq_aarch64
- enter_workaround 4
- check_vector_size workaround_bpiall_vbar0_fiq_aarch64
-
-vector_entry workaround_bpiall_vbar0_serror_aarch64
- enter_workaround 8
- check_vector_size workaround_bpiall_vbar0_serror_aarch64
-
- /* ---------------------------------------------------------------------
- * Lower EL using AArch32 : 0x600 - 0x800
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_bpiall_vbar0_sync_exception_aarch32
- enter_workaround 1
- check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32
-
-vector_entry workaround_bpiall_vbar0_irq_aarch32
- enter_workaround 2
- check_vector_size workaround_bpiall_vbar0_irq_aarch32
-
-vector_entry workaround_bpiall_vbar0_fiq_aarch32
- enter_workaround 4
- check_vector_size workaround_bpiall_vbar0_fiq_aarch32
-
-vector_entry workaround_bpiall_vbar0_serror_aarch32
- enter_workaround 8
- check_vector_size workaround_bpiall_vbar0_serror_aarch32
-
- /* ---------------------------------------------------------------------
- * This vector table is used while the workaround is executing. It
- * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError
- * workaround stubs to enter EL3 from S-EL1. It restores the previous
- * EL3 state before proceeding with the normal runtime exception vector.
- * ---------------------------------------------------------------------
- */
-vector_base workaround_bpiall_vbar1_runtime_exceptions
-
- /* ---------------------------------------------------------------------
- * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED)
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_bpiall_vbar1_sync_exception_sp_el0
- b report_unhandled_exception
- check_vector_size workaround_bpiall_vbar1_sync_exception_sp_el0
-
-vector_entry workaround_bpiall_vbar1_irq_sp_el0
- b report_unhandled_interrupt
- check_vector_size workaround_bpiall_vbar1_irq_sp_el0
-
-vector_entry workaround_bpiall_vbar1_fiq_sp_el0
- b report_unhandled_interrupt
- check_vector_size workaround_bpiall_vbar1_fiq_sp_el0
-
-vector_entry workaround_bpiall_vbar1_serror_sp_el0
- b report_unhandled_exception
- check_vector_size workaround_bpiall_vbar1_serror_sp_el0
-
- /* ---------------------------------------------------------------------
- * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_bpiall_vbar1_sync_exception_sp_elx
- b report_unhandled_exception
- check_vector_size workaround_bpiall_vbar1_sync_exception_sp_elx
-
-vector_entry workaround_bpiall_vbar1_irq_sp_elx
- b report_unhandled_interrupt
- check_vector_size workaround_bpiall_vbar1_irq_sp_elx
-
-vector_entry workaround_bpiall_vbar1_fiq_sp_elx
- b report_unhandled_interrupt
- check_vector_size workaround_bpiall_vbar1_fiq_sp_elx
-
-vector_entry workaround_bpiall_vbar1_serror_sp_elx
- b report_unhandled_exception
- check_vector_size workaround_bpiall_vbar1_serror_sp_elx
-
- /* ---------------------------------------------------------------------
- * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_bpiall_vbar1_sync_exception_aarch64
- b report_unhandled_exception
- check_vector_size workaround_bpiall_vbar1_sync_exception_aarch64
-
-vector_entry workaround_bpiall_vbar1_irq_aarch64
- b report_unhandled_interrupt
- check_vector_size workaround_bpiall_vbar1_irq_aarch64
-
-vector_entry workaround_bpiall_vbar1_fiq_aarch64
- b report_unhandled_interrupt
- check_vector_size workaround_bpiall_vbar1_fiq_aarch64
-
-vector_entry workaround_bpiall_vbar1_serror_aarch64
- b report_unhandled_exception
- check_vector_size workaround_bpiall_vbar1_serror_aarch64
-
- /* ---------------------------------------------------------------------
- * Lower EL using AArch32 : 0x600 - 0x800
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_bpiall_vbar1_sync_exception_aarch32
- /*
- * w2 indicates which SEL1 stub was run and thus which original vector was used
- * w3-w6 contain saved system register state (esr_el3 in w3)
- * Restore LR and ELR_EL3 register state from the GP regs context
- */
- ldp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
-
- /* Apply the restored system register state */
- msr esr_el3, x3
- msr spsr_el3, x4
- msr scr_el3, x5
- msr sctlr_el1, x6
- msr elr_el3, x7
-
- /*
- * Workaround is complete, so swap VBAR_EL3 to point
- * to workaround entry table in preparation for subsequent
- * Sync/IRQ/FIQ/SError exceptions.
- */
- adr x0, workaround_bpiall_vbar0_runtime_exceptions
- msr vbar_el3, x0
-
- /*
- * Restore all GP regs except x2 and x3 (esr). The value in x2
- * indicates the type of the original exception.
- */
- ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
- ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
- ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
- ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
- ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
- ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
- ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
- ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
- ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
- ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
- ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
- ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
- ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
- ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
-
- /* Fast path Sync exceptions. Static predictor will fall through. */
- tbz w2, #0, workaround_not_sync
-
- /*
- * Check if SMC is coming from A64 state on #0
- * with W0 = SMCCC_ARCH_WORKAROUND_1
- *
- * This sequence evaluates as:
- * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
- * allowing use of a single branch operation
- */
- orr w2, wzr, #SMCCC_ARCH_WORKAROUND_1
- cmp w0, w2
- mov_imm w2, ESR_EL3_A64_SMC0
- ccmp w3, w2, #0, eq
- /* Static predictor will predict a fall through */
- bne 1f
- eret
-1:
- ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
- b sync_exception_aarch64
- check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32
-
-vector_entry workaround_bpiall_vbar1_irq_aarch32
- b report_unhandled_interrupt
-
- /*
- * Post-workaround fan-out for non-sync exceptions
- */
-workaround_not_sync:
- tbnz w2, #3, workaround_bpiall_vbar1_serror
- tbnz w2, #2, workaround_bpiall_vbar1_fiq
- /* IRQ */
- ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
- b irq_aarch64
-
-workaround_bpiall_vbar1_fiq:
- ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
- b fiq_aarch64
-
-workaround_bpiall_vbar1_serror:
- ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
- b serror_aarch64
- check_vector_size workaround_bpiall_vbar1_irq_aarch32
-
-vector_entry workaround_bpiall_vbar1_fiq_aarch32
- b report_unhandled_interrupt
- check_vector_size workaround_bpiall_vbar1_fiq_aarch32
-
-vector_entry workaround_bpiall_vbar1_serror_aarch32
- b report_unhandled_exception
- check_vector_size workaround_bpiall_vbar1_serror_aarch32
+++ /dev/null
-/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch.h>
-#include <arm_arch_svc.h>
-#include <asm_macros.S>
-#include <context.h>
-
- .globl workaround_mmu_runtime_exceptions
-
-#define ESR_EL3_A64_SMC0 0x5e000000
-
-vector_base workaround_mmu_runtime_exceptions
-
- .macro apply_workaround _is_sync_exception
- stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
- mrs x1, sctlr_el3
- /* Disable MMU */
- bic x1, x1, #SCTLR_M_BIT
- msr sctlr_el3, x1
- isb
- /* Enable MMU */
- orr x1, x1, #SCTLR_M_BIT
- msr sctlr_el3, x1
- /*
- * Defer ISB to avoid synchronizing twice in case we hit
- * the workaround SMC call which will implicitly synchronize
- * because of the ERET instruction.
- */
-
- /*
- * Ensure SMC is coming from A64 state on #0
- * with W0 = SMCCC_ARCH_WORKAROUND_1
- *
- * This sequence evaluates as:
- * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE)
- * allowing use of a single branch operation
- */
- .if \_is_sync_exception
- orr w1, wzr, #SMCCC_ARCH_WORKAROUND_1
- cmp w0, w1
- mrs x0, esr_el3
- mov_imm w1, ESR_EL3_A64_SMC0
- ccmp w0, w1, #0, eq
- /* Static predictor will predict a fall through */
- bne 1f
- eret
-1:
- .endif
-
- /*
- * Synchronize now to enable the MMU. This is required
- * to ensure the load pair below reads the data stored earlier.
- */
- isb
- ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
- .endm
-
- /* ---------------------------------------------------------------------
- * Current EL with SP_EL0 : 0x0 - 0x200
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_mmu_sync_exception_sp_el0
- b sync_exception_sp_el0
- check_vector_size workaround_mmu_sync_exception_sp_el0
-
-vector_entry workaround_mmu_irq_sp_el0
- b irq_sp_el0
- check_vector_size workaround_mmu_irq_sp_el0
-
-vector_entry workaround_mmu_fiq_sp_el0
- b fiq_sp_el0
- check_vector_size workaround_mmu_fiq_sp_el0
-
-vector_entry workaround_mmu_serror_sp_el0
- b serror_sp_el0
- check_vector_size workaround_mmu_serror_sp_el0
-
- /* ---------------------------------------------------------------------
- * Current EL with SP_ELx: 0x200 - 0x400
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_mmu_sync_exception_sp_elx
- b sync_exception_sp_elx
- check_vector_size workaround_mmu_sync_exception_sp_elx
-
-vector_entry workaround_mmu_irq_sp_elx
- b irq_sp_elx
- check_vector_size workaround_mmu_irq_sp_elx
-
-vector_entry workaround_mmu_fiq_sp_elx
- b fiq_sp_elx
- check_vector_size workaround_mmu_fiq_sp_elx
-
-vector_entry workaround_mmu_serror_sp_elx
- b serror_sp_elx
- check_vector_size workaround_mmu_serror_sp_elx
-
- /* ---------------------------------------------------------------------
- * Lower EL using AArch64 : 0x400 - 0x600
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_mmu_sync_exception_aarch64
- apply_workaround _is_sync_exception=1
- b sync_exception_aarch64
- check_vector_size workaround_mmu_sync_exception_aarch64
-
-vector_entry workaround_mmu_irq_aarch64
- apply_workaround _is_sync_exception=0
- b irq_aarch64
- check_vector_size workaround_mmu_irq_aarch64
-
-vector_entry workaround_mmu_fiq_aarch64
- apply_workaround _is_sync_exception=0
- b fiq_aarch64
- check_vector_size workaround_mmu_fiq_aarch64
-
-vector_entry workaround_mmu_serror_aarch64
- apply_workaround _is_sync_exception=0
- b serror_aarch64
- check_vector_size workaround_mmu_serror_aarch64
-
- /* ---------------------------------------------------------------------
- * Lower EL using AArch32 : 0x600 - 0x800
- * ---------------------------------------------------------------------
- */
-vector_entry workaround_mmu_sync_exception_aarch32
- apply_workaround _is_sync_exception=1
- b sync_exception_aarch32
- check_vector_size workaround_mmu_sync_exception_aarch32
-
-vector_entry workaround_mmu_irq_aarch32
- apply_workaround _is_sync_exception=0
- b irq_aarch32
- check_vector_size workaround_mmu_irq_aarch32
-
-vector_entry workaround_mmu_fiq_aarch32
- apply_workaround _is_sync_exception=0
- b fiq_aarch32
- check_vector_size workaround_mmu_fiq_aarch32
-
-vector_entry workaround_mmu_serror_aarch32
- apply_workaround _is_sync_exception=0
- b serror_aarch32
- check_vector_size workaround_mmu_serror_aarch32
#include <runtime_svc.h>
#include <smccc.h>
#include <smccc_helpers.h>
-#include <workaround_cve_2017_5715.h>
+#include <wa_cve_2017_5715.h>
static int32_t smccc_version(void)
{
return SMC_OK;
#if WORKAROUND_CVE_2017_5715
case SMCCC_ARCH_WORKAROUND_1:
- if (check_workaround_cve_2017_5715() == ERRATA_NOT_APPLIES)
+ if (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES)
return 1;
return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
#endif